#import cv2
import numpy as np
from tensorflow.keras.models import Model, load_model
import tensorflow as tf
from tensorflow import keras
#from IPython.display import Image, display
from tensorflow.keras.applications.inception_v3 import InceptionV3, decode_predictions, preprocess_input
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import load_img
#from PIL import Image
import sys
sys.path.append('QXP/')
from ScoreCAM_QXP import ScoreCAM_QXP
from computeEvalX import EvalX
print(tf.keras.__version__)
print(tf.__version__)
print(np.__version__)
print(sys.version)
2.4.0 2.4.1 1.19.5 3.8.5 (default, Sep 4 2020, 02:22:02) [Clang 10.0.0 ]
tf.keras.backend.clear_session()
model = InceptionV3(weights='imagenet', include_top=True)
#model.summary()
import urllib.request
# Download the ImageNet class labels file
url = 'https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt'
filename = 'imagenet_classes.txt'
urllib.request.urlretrieve(url, filename)
# Load the class labels from the file
with open(filename, 'r') as f:
labels = f.read().splitlines()
# Print the class labels
#print(labels)
path = '/Users/air/Downloads/DeepAI_images/example_explaining_qxp.jpg'
base_img = load_img(path, target_size=(299,299))
base_img = np.array(base_img)
#Visualizing
plt.imshow(base_img)
plt.axis('off')
plt.tight_layout()
plt.show()
img_tensor = preprocess_input(base_img)
img_tensor = tf.expand_dims(img_tensor, axis=0)
#plt.imshow(img_tensor[0])
predictions = model(img_tensor, training=False)
print(predictions.shape)
decoded_predictions = decode_predictions(predictions.numpy(), top=5)[0]
# Print the top 5 predictions
for c, label, confidence in decoded_predictions:
print(f"{label}: {confidence}")
(1, 1000) violin: 0.6028324961662292 home_theater: 0.12355843931436539 cello: 0.10592108964920044 entertainment_center: 0.017840880900621414 acoustic_guitar: 0.00532774580642581
score_1 = labels.index('violin')
print(score_1)
889
qxp = ScoreCAM_QXP(model, penultimate_layer='conv2d_93', threshold=None)
qxp_info, masked = qxp(img_tensor, scores=[score_1], quantitative_explanation = True, labels=labels, activation_modifier = True)
preds = model.predict(img_tensor)
sorted_indices = np.argsort(-preds)
for pred in sorted_indices:
print(" ".join("{},".format(labels[i]) for i in pred[:10]), end="\n")
# Show Score-CAM results
plt.imshow(base_img)
plt.imshow(qxp_info[0], cmap='jet', alpha=0.5)
plt.axis('off')
plt.tight_layout()
plt.show()
violin, home theater, cello, entertainment center, acoustic guitar, television, loudspeaker, stage, upright, radio,
N=7
print(tf.argmax(model.predict(masked[0][:N]), axis=-1))
f, ax = plt.subplots(nrows=1, ncols=len(masked[0][:N]), figsize=(12, 4))
for i in range(len(masked[0][:N])):
ax[i].imshow(masked[0][i])
ax[i].axis('off')
plt.tight_layout()
plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
tf.Tensor([598 889 889 889 889 889 889], shape=(7,), dtype=int64)
model_eval = EvalX()(img_tensor, model, penultimate_layer='conv2d_93', threshold=None, ground_truths=(tf.expand_dims(tf.keras.utils.to_categorical(score_1, 1000), axis=0)).numpy())
avg_kl, avg_jd, avg_qxp = model_eval
print('Avg. Kullback–Leibler divergence:', avg_kl)
print('Avg. Jaccard Distance:', avg_jd)
print('Avg. QXP predictions:', avg_qxp)
Avg. Kullback–Leibler divergence: [0.7688590022956416] Avg. Jaccard Distance: [0.0] Avg. QXP predictions: [3]
score_2 = labels.index('cello')
qxp_info, _ = qxp(img_tensor, scores=[score_2], quantitative_explanation = True, labels=labels, activation_modifier = True)
preds = model.predict(img_tensor)
sorted_indices = np.argsort(-preds)
for pred in sorted_indices:
print(" ".join("{},".format(labels[i]) for i in pred[:10]), end="\n")
# Show Score-CAM results
plt.imshow(base_img)
plt.imshow(qxp_info[0], cmap='jet', alpha=0.5)
plt.axis('off')
plt.tight_layout()
plt.show()
violin, home theater, cello, entertainment center, acoustic guitar, television, loudspeaker, stage, upright, radio,
score_3 = labels.index('home theater')
qxp_info, _ = qxp(img_tensor, scores=[score_3], quantitative_explanation = True, labels=labels, activation_modifier = True)
preds = model.predict(img_tensor)
sorted_indices = np.argsort(-preds)
for pred in sorted_indices:
print(" ".join("{},".format(labels[i]) for i in pred[:10]), end="\n")
# Show Score-CAM results
plt.imshow(base_img)
plt.imshow(qxp_info[0], cmap='jet', alpha=0.5)
plt.axis('off')
plt.tight_layout()
plt.show()
violin, home theater, cello, entertainment center, acoustic guitar, television, loudspeaker, stage, upright, radio,
score_4 = labels.index('acoustic guitar')
qxp_info, _ = qxp(img_tensor, scores=[score_4], quantitative_explanation = True, labels=labels, activation_modifier = True)
preds = model.predict(img_tensor)
sorted_indices = np.argsort(-preds)
for pred in sorted_indices:
print(" ".join("{},".format(labels[i]) for i in pred[:10]), end="\n")
# Show Score-CAM results
plt.imshow(base_img)
plt.imshow(qxp_info[0], cmap='jet', alpha=0.5)
plt.axis('off')
plt.tight_layout()
plt.show()
violin, home theater, cello, entertainment center, acoustic guitar, television, loudspeaker, stage, upright, radio,